1: // check the guest VHPT
adds r19 = XSI_PTA_OFS-XSI_PSR_IC_OFS, r18;;
- ld8 r19=[r19];;
- tbit.nz p7,p0=r19,IA64_PTA_VF_BIT;; // long format VHPT
-(p7) br.cond.spnt.few page_fault;;
+ ld8 r19=[r19]
// if (!rr.ve || !(pta & IA64_PTA_VE)) take slow way for now
// FIXME: later, we deliver an alt_d/i vector after thash and itir
- tbit.z p7,p0=r19,IA64_PTA_VE_BIT
-(p7) br.cond.spnt.few page_fault;;
extr.u r25=r17,61,3
adds r21=XSI_RR0_OFS-XSI_PSR_IC_OFS,r18 ;;
shl r25=r25,3;;
ld8 r22=[r21];;
tbit.z p7,p0=r22,0
(p7) br.cond.spnt.few page_fault;;
+ tbit.z p7,p0=r19,IA64_PTA_VE_BIT
+(p7) br.cond.spnt.few page_fault;;
+ tbit.nz p7,p0=r19,IA64_PTA_VF_BIT // long format VHPT
+(p7) br.cond.spnt.few page_fault;;
// compute and save away itir (r22 & RR_PS_MASK)
movl r21=IA64_ITIR_PS_MASK;;
#define IA64_PTA_SZ_BIT 2
#define IA64_PTA_VF_BIT 8
#define IA64_PTA_BASE_BIT 15
-#define IA64_PTA_LFMT (1UL << IA64_PTA_VF_BIT)
#define IA64_PTA_SZ(x) (x##UL << IA64_PTA_SZ_BIT)
#define IA64_PSR_NON_VIRT_BITS \
IA64FAULT vcpu_set_pta(VCPU * vcpu, u64 val)
{
- if (val & IA64_PTA_LFMT) {
- printk("*** No support for VHPT long format yet!!\n");
- return IA64_ILLOP_FAULT;
- }
if (val & (0x3f << 9)) /* reserved fields */
return IA64_RSVDREG_FAULT;
if (val & 2) /* reserved fields */
/* check guest VHPT */
pta = PSCB(vcpu, pta);
- if (pta & IA64_PTA_VF) { /* long format VHPT - not implemented */
- panic_domain(vcpu_regs(vcpu), "can't do long format VHPT\n");
- //return is_data ? IA64_DATA_TLB_VECTOR:IA64_INST_TLB_VECTOR;
- }
*itir = rr & (RR_RID_MASK | RR_PS_MASK);
// note: architecturally, iha is optionally set for alt faults but
IA64_ALT_INST_TLB_VECTOR;
}
+ if (pta & IA64_PTA_VF) { /* long format VHPT - not implemented */
+ /*
+ * minimal support: vhpt walker is really dumb and won't find
+ * anything
+ */
+ return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
+ }
/* avoid recursively walking (short format) VHPT */
if (((address ^ pta) & ((itir_mask(pta) << 3) >> 3)) == 0)
return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;